The runqueue management functions removed from sched-if.h and put into schedulers
#define BVT_INFO(p) ((struct bvt_dom_info *)(p)->sched_priv)
#define CPU_INFO(cpu) ((struct bvt_cpu_info *)(schedule_data[cpu]).sched_priv)
-#define RUNLIST(p) &(BVT_INFO(p)->run_list)
-#define RUNQUEUE(cpu) &(CPU_INFO(cpu)->runqueue)
+#define RUNLIST(p) ((struct list_head *)&(BVT_INFO(p)->run_list))
+#define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
#define CPU_SVT(cpu) (CPU_INFO(cpu)->svt)
#define MCU (s32)MICROSECS(100) /* Minimum unit */
/* SLAB cache for struct bvt_dom_info objects */
static xmem_cache_t *dom_info_cache;
+/*
+ * Wrappers for run-queue management. Must be called with the run_lock
+ * held.
+ */
+static inline void __add_to_runqueue_head(struct domain *d)
+{
+ list_add(RUNLIST(d), RUNQUEUE(d->processor));
+}
+
+static inline void __add_to_runqueue_tail(struct domain *d)
+{
+ list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
+}
+
+static inline void __del_from_runqueue(struct domain *d)
+{
+ struct list_head *runlist = RUNLIST(d);
+ list_del(runlist);
+ runlist->next = NULL;
+}
+
+static inline int __task_on_runqueue(struct domain *d)
+{
+ return (RUNLIST(d))->next != NULL;
+}
+
/*
* Calculate the effective virtual time for a domain. Take into account
* warping limits
spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);
set_bit(DF_RUNNING, &p->flags);
- if ( !__task_on_runqueue(RUNLIST(p)) )
- __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
+ if ( !__task_on_runqueue(p) )
+ __add_to_runqueue_head(p);
spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);
spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
/* If on the runqueue already then someone has done the wakeup work. */
- if ( unlikely(__task_on_runqueue(RUNLIST(d))) )
+ if ( unlikely(__task_on_runqueue(d)) )
{
spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags);
return;
}
- __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(d->processor));
+ __add_to_runqueue_head(d);
now = NOW();
spin_lock_irqsave(&CPU_INFO(d->processor)->run_lock, flags);
- if ( __task_on_runqueue(RUNLIST(d)) )
- __del_from_runqueue(RUNLIST(d));
+ if ( __task_on_runqueue(d) )
+ __del_from_runqueue(d);
spin_unlock_irqrestore(&CPU_INFO(d->processor)->run_lock, flags);
}
ASSERT(prev_inf != NULL);
spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
- ASSERT(__task_on_runqueue(RUNLIST(prev)));
+ ASSERT(__task_on_runqueue(prev));
if ( likely(!is_idle_task(prev)) )
{
__calc_evt(prev_inf);
- __del_from_runqueue(RUNLIST(prev));
+ __del_from_runqueue(prev);
if ( domain_runnable(prev) )
- __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
+ __add_to_runqueue_tail(prev);
}
#define FBVT_INFO(p) ((struct fbvt_dom_info *)(p)->sched_priv)
#define CPU_INFO(cpu) ((struct fbvt_cpu_info *)(schedule_data[cpu]).sched_priv)
-#define RUNLIST(p) (struct list_head *)(&(FBVT_INFO(p)->run_list))
-#define RUNQUEUE(cpu) (struct list_head *)&(CPU_INFO(cpu)->runqueue)
+#define RUNLIST(p) ((struct list_head *)&(FBVT_INFO(p)->run_list))
+#define RUNQUEUE(cpu) ((struct list_head *)&(CPU_INFO(cpu)->runqueue))
#define CPU_SVT(cpu) (CPU_INFO(cpu)->svt)
#define LAST_VTB(cpu) (CPU_INFO(cpu)->vtb)
#define R_TIME(cpu) (CPU_INFO(cpu)->r_time)
/* SLAB cache for struct fbvt_dom_info objects */
static xmem_cache_t *dom_info_cache;
+
+/*
+ * Wrappers for run-queue management. Must be called with the run_lock
+ * held.
+ */
+static inline void __add_to_runqueue_head(struct domain *d)
+{
+ list_add(RUNLIST(d), RUNQUEUE(d->processor));
+}
+
+static inline void __add_to_runqueue_tail(struct domain *d)
+{
+ list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
+}
+
+static inline void __del_from_runqueue(struct domain *d)
+{
+ struct list_head *runlist = RUNLIST(d);
+ list_del(runlist);
+ runlist->next = NULL;
+}
+
+static inline int __task_on_runqueue(struct domain *d)
+{
+ return (RUNLIST(d))->next != NULL;
+}
+
/*
* Calculate the effective virtual time for a domain. Take into account
* warping limits
fbvt_add_task(p);
spin_lock_irqsave(&CPU_INFO(p->processor)->run_lock, flags);
set_bit(DF_RUNNING, &p->flags);
- if ( !__task_on_runqueue(RUNLIST(p)) )
- __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
+ if ( !__task_on_runqueue(p) )
+ __add_to_runqueue_head(p);
spin_unlock_irqrestore(&CPU_INFO(p->processor)->run_lock, flags);
return 0;
spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
/* If on the runqueue already then someone has done the wakeup work. */
- if ( unlikely(__task_on_runqueue(RUNLIST(d))) )
+ if ( unlikely(__task_on_runqueue(d)) )
{
spin_unlock_irqrestore(&CPU_INFO(cpu)->run_lock, flags);
return;
}
- __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu));
+ __add_to_runqueue_head(d);
now = NOW();
/* The runqueue accesses must be protected */
spin_lock_irqsave(&CPU_INFO(d->processor)->run_lock, flags);
- if ( __task_on_runqueue(RUNLIST(d)) )
- __del_from_runqueue(RUNLIST(d));
+ if ( __task_on_runqueue(d) )
+ __del_from_runqueue(d);
spin_unlock_irqrestore(&CPU_INFO(d->processor)->run_lock, flags);
}
spin_lock_irqsave(&CPU_INFO(cpu)->run_lock, flags);
- ASSERT(__task_on_runqueue(RUNLIST(prev)));
+ ASSERT(__task_on_runqueue(prev));
if ( likely(!is_idle_task(prev)) )
{
__calc_evt(prev_inf);
- __del_from_runqueue(RUNLIST(prev));
+ __del_from_runqueue(prev);
if ( domain_runnable(prev) )
- __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
+ __add_to_runqueue_tail(prev);
}
/* We should at least have the idle task */
static spinlock_t run_locks[NR_CPUS];
#define RR_INFO(d) ((struct rrobin_dom_info *)d->sched_priv)
-#define RUNLIST(d) (struct list_head *)&(RR_INFO(d)->run_list)
+#define RUNLIST(d) ((struct list_head *)&(RR_INFO(d)->run_list))
#define RUNQUEUE(cpu) RUNLIST(schedule_data[cpu].idle)
-// TODO remove following line
-static void rr_dump_cpu_state(int cpu);
-
/* SLAB cache for struct rrobin_dom_info objects */
static xmem_cache_t *dom_info_cache;
+/*
+ * Wrappers for run-queue management. Must be called with the run_lock
+ * held.
+ */
+static inline void __add_to_runqueue_head(struct domain *d)
+{
+ list_add(RUNLIST(d), RUNQUEUE(d->processor));
+}
+
+static inline void __add_to_runqueue_tail(struct domain *d)
+{
+ list_add_tail(RUNLIST(d), RUNQUEUE(d->processor));
+}
+
+static inline void __del_from_runqueue(struct domain *d)
+{
+ struct list_head *runlist = RUNLIST(d);
+ list_del(runlist);
+ runlist->next = NULL;
+}
+
+static inline int __task_on_runqueue(struct domain *d)
+{
+ return (RUNLIST(d))->next != NULL;
+}
+
/* Ensures proper initialisation of the dom_info */
static void cache_constructor(void *arg1, xmem_cache_t *arg2, unsigned long arg3)
spin_lock_irqsave(&run_locks[p->processor], flags);
set_bit(DF_RUNNING, &p->flags);
- if ( !__task_on_runqueue(RUNLIST(p)) )
- __add_to_runqueue_head(RUNLIST(p), RUNQUEUE(p->processor));
+ if ( !__task_on_runqueue(p) )
+ __add_to_runqueue_head(p);
spin_unlock_irqrestore(&run_locks[p->processor], flags);
return 0;
}
if(!is_idle_task(prev))
{
- __del_from_runqueue(RUNLIST(prev));
+ __del_from_runqueue(prev);
if ( domain_runnable(prev) )
- __add_to_runqueue_tail(RUNLIST(prev), RUNQUEUE(cpu));
+ __add_to_runqueue_tail(prev);
}
spin_unlock_irqrestore(&run_locks[cpu], flags);
- ret.task = list_entry( RUNQUEUE(cpu).next->next,
+ ret.task = list_entry( RUNQUEUE(cpu)->next,
struct rrobin_dom_info,
run_list)->domain;
ret.time = rr_slice;
else
{
spin_lock_irqsave(&run_locks[d->processor], flags);
- if ( __task_on_runqueue(RUNLIST(d)) )
- __del_from_runqueue(RUNLIST(d));
+ if ( __task_on_runqueue(d) )
+ __del_from_runqueue(d);
spin_unlock_irqrestore(&run_locks[d->processor], flags);
}
}
spin_lock_irqsave(&run_locks[cpu], flags);
/* If on the runqueue already then someone has done the wakeup work. */
- if ( unlikely(__task_on_runqueue(RUNLIST(d))))
+ if ( unlikely(__task_on_runqueue(d)))
{
spin_unlock_irqrestore(&run_locks[cpu], flags);
return;
}
- __add_to_runqueue_head(RUNLIST(d), RUNQUEUE(cpu));
+ __add_to_runqueue_head(d);
spin_unlock_irqrestore(&run_locks[cpu], flags);
now = NOW();
/* per CPU scheduler information */
extern schedule_data_t schedule_data[];
-/*
- * Wrappers for run-queue management. Must be called with the schedule_lock
- * held.
- */
-static inline void __add_to_runqueue_head(struct list_head *run_list, struct list_head *runqueue)
-{
- list_add(run_list, runqueue);
-}
-
-static inline void __add_to_runqueue_tail(struct list_head *run_list, struct list_head *runqueue)
-{
- list_add_tail(run_list, runqueue);
-}
-
-static inline void __del_from_runqueue(struct list_head *run_list)
-{
- list_del(run_list);
- run_list->next = NULL;
-}
-
-static inline int __task_on_runqueue(struct list_head *run_list)
-{
- return run_list->next != NULL;
-}